Assay panel labelled 1

Load the Data

knitr::opts_chunk$set(echo = TRUE, fig.width = 8, fig.height = 6)
#######################
# ASSAY 1
#######################
# Load the data from the Excel file
file_path <- here("R&D3plex_LX200_20240222_1.xlsx")
data <- read_excel(file_path, sheet = "FI - Bkgd", range = "A62:F156")
## New names:
## • `FI - Bkgd` -> `FI - Bkgd...4`
## • `FI - Bkgd` -> `FI - Bkgd...5`
## • `FI - Bkgd` -> `FI - Bkgd...6`
# Rename columns for easier reference
colnames(data) <- c("Type", "Well", "Description", "IL1b", "IL6", "TNFa")
# Fill in missing Description values for the first 14 rows based on Type
data <- data %>%
  mutate(Description = ifelse(row_number() <= 14 & is.na(Description), Type, Description))
# Verify the changes
head(data, 20)

# Load the expected concentrations from the Excel file
exp_conc <- read_excel(file_path, sheet = "Exp Conc", range = "D65:F76", col_names = FALSE)
## New names:
## • `` -> `...1`
## • `` -> `...2`
## • `` -> `...3`
# Set the correct column names
colnames(exp_conc) <- c("IL1b", "IL6", "TNFa")

# Extract the standard data and add expected concentrations
standard_data <- data %>%
  filter(grepl("^S", Type)) %>%
  mutate(IL1b_exp = exp_conc$IL1b,
         IL6_exp = exp_conc$IL6,
         TNFa_exp = exp_conc$TNFa)

Calculate Mean and %CV for Replicates

Interpretation: VERY poor fit at lower end of expected concentrations.

Use weighting strategy to address poor fit at lower end of concentration range

Back-calculate standard curve observed values with weighted model

Predict values for unknown samples

Interpolate values for samples flagged as “out of range”

Calculate means and multiply by 30

knitr::opts_chunk$set(echo = TRUE, fig.width = 8, fig.height = 6)

# Calculate the mean of the duplicates based on the Description value and include the check value
mean_duplicates <- reportable_data %>%
  group_by(Description) %>%
  summarise(Mean_IL1b = (mean(report_IL1b))*30, #calculate mean and multiply by 30 to account for dilution
            Mean_IL6 = (mean(report_IL6))*30,
            Mean_TNFa = (mean(report_TNFa))*30,
            Check_IL1b = ifelse(any(Check_IL1b == "Out of Range"), "Out of Range", "In Range"),
            Check_IL6 = ifelse(any(Check_IL6 == "Out of Range"), "Out of Range", "In Range"),
            Check_TNFa = ifelse(any(Check_TNFa == "Out of Range"), "Out of Range", "In Range"))

# Display the mean of the duplicates
# mean_duplicates

Save results

# Save the unknown raw values to a CSV file
write.csv(unknown_data, here("unknown_raw_values.csv"), row.names = FALSE)

# Save the mean of the duplicates to a CSV file
readr::write_rds(mean_duplicates, here("luminex_results.rds"))
write.csv(mean_duplicates, here("luminex_results.csv"), row.names = FALSE)

Assay panel labelled 2

Load the Data

knitr::opts_chunk$set(echo = TRUE, fig.width = 8, fig.height = 6)

# Load the data from the Excel file
#######################
# ASSAY 2
#######################
file_path_2 <- here("R&D3plex_LX200_20240222_2.xlsx")
data_2 <- read_excel(file_path_2, sheet = "FI - Bkgd", range = "A61:F153")
## New names:
## • `FI - Bkgd` -> `FI - Bkgd...4`
## • `FI - Bkgd` -> `FI - Bkgd...5`
## • `FI - Bkgd` -> `FI - Bkgd...6`
# Rename columns for easier reference
colnames(data_2) <- c("Type", "Well", "Description", "IL1b", "IL6", "TNFa")
# Fill in missing Description values for the first 14 rows based on Type
data_2 <- data_2 %>%
  mutate(Description = ifelse(row_number() <= 14 & is.na(Description), Type, Description))
# Verify the changes
head(data_2, 20)

# Load the expected concentrations from the Excel file
exp_conc_2 <- read_excel(file_path_2, sheet = "Exp Conc", range = "D64:F75", col_names = FALSE)
## New names:
## • `` -> `...1`
## • `` -> `...2`
## • `` -> `...3`
# Set the correct column names
colnames(exp_conc_2) <- c("IL1b", "IL6", "TNFa")

# Extract the standard data and add expected concentrations
standard_data_2 <- data_2 %>%
  filter(grepl("^S", Type)) %>%
  mutate(IL1b_exp = exp_conc_2$IL1b,
         IL6_exp = exp_conc_2$IL6,
         TNFa_exp = exp_conc_2$TNFa)

Calculate Mean and %CV for Replicates

Interpretation: VERY poor fit at lower end of expected concentrations.

Use weighting strategy to address poor fit at lower end of concentration range

Back-calculate standard curve observed values with weighted model

Predict values for unknown samples

Interpolate values for samples flagged as “out of range”

Calculate means and multiply by 30

knitr::opts_chunk$set(echo = TRUE, fig.width = 8, fig.height = 6)

# Calculate the mean of the duplicates based on the Description value and include the check value
mean_duplicates_2 <- reportable_data_2 %>%
  group_by(Description) %>%
  summarise(Mean_IL1b = (mean(report_IL1b))*30, #calculate mean and multiply by 30 to account for dilution
            Mean_IL6 = (mean(report_IL6))*30,
            Mean_TNFa = (mean(report_TNFa))*30,
            Check_IL1b = ifelse(any(Check_IL1b == "Out of Range"), "Out of Range", "In Range"),
            Check_IL6 = ifelse(any(Check_IL6 == "Out of Range"), "Out of Range", "In Range"),
            Check_TNFa = ifelse(any(Check_TNFa == "Out of Range"), "Out of Range", "In Range"))

# Display the mean of the duplicates
# mean_duplicates_2

Save results

# Save the unknown raw values to a CSV file
write.csv(unknown_data_2, here("unknown_raw_values_2.csv"), row.names = FALSE)

# Save the mean of the duplicates to a CSV file
readr::write_rds(mean_duplicates_2, here("luminex_results_2.rds"))
write.csv(mean_duplicates_2, here("luminex_results_2.csv"), row.names = FALSE)

Assay panel labelled 3

Load the Data

knitr::opts_chunk$set(echo = TRUE, fig.width = 8, fig.height = 6)

# Load the data from the Excel file
#######################
# ASSAY 3
#######################
file_path_3 <- here("R&D3plex_LX200_20240226_3.xlsx")
data_3 <- read_excel(file_path_3, sheet = "FI - Bkgd", range = "A63:F159")
## New names:
## • `FI - Bkgd` -> `FI - Bkgd...4`
## • `FI - Bkgd` -> `FI - Bkgd...5`
## • `FI - Bkgd` -> `FI - Bkgd...6`
# Rename columns for easier reference
colnames(data_3) <- c("Type", "Well", "Description", "IL1b", "IL6", "TNFa")
# Fill in missing Description values for the first 14 rows based on Type
data_3 <- data_3 %>%
  mutate(Description = ifelse(row_number() <= 14 & is.na(Description), Type, Description))
# Verify the changes
head(data_3, 20)

# Load the expected concentrations from the Excel file
exp_conc_3 <- read_excel(file_path_3, sheet = "Exp Conc", range = "D66:F77", col_names = FALSE)
## New names:
## • `` -> `...1`
## • `` -> `...2`
## • `` -> `...3`
# Set the correct column names
colnames(exp_conc_3) <- c("IL1b", "IL6", "TNFa")

# Extract the standard data and add expected concentrations
standard_data_3 <- data_3 %>%
  filter(grepl("^S", Type)) %>%
  mutate(IL1b_exp = exp_conc_3$IL1b, 
         IL6_exp = exp_conc_3$IL6,
         TNFa_exp = exp_conc_3$TNFa)

Calculate Mean and %CV for Replicates

Use weighting strategy to address poor fit at lower end of concentration range

Back-calculate standard curve observed values with weighted model

Predict values for unknown samples

Interpolate values for samples flagged as “out of range”

Calculate means and multiply by 30

knitr::opts_chunk$set(echo = TRUE, fig.width = 8, fig.height = 6)

# Calculate the mean of the duplicates based on the Description value and include the check value
mean_duplicates_3 <- reportable_data_3 %>%
  group_by(Description) %>%
  summarise(Mean_IL1b = (mean(report_IL1b))*30, #calculate mean and multiply by 30 to account for dilution
            Mean_IL6 = (mean(report_IL6))*30,
            Mean_TNFa = (mean(report_TNFa))*30,
            Check_IL1b = ifelse(any(Check_IL1b == "Out of Range"), "Out of Range", "In Range"),
            Check_IL6 = ifelse(any(Check_IL6 == "Out of Range"), "Out of Range", "In Range"),
            Check_TNFa = ifelse(any(Check_TNFa == "Out of Range"), "Out of Range", "In Range"))

# Display the mean of the duplicates
# mean_duplicates_3

Save results

# Save the unknown raw values to a CSV file
write.csv(unknown_data_3, here("unknown_raw_values_3.csv"), row.names = FALSE)

# Save the mean of the duplicates to a CSV file
readr::write_rds(mean_duplicates_3, here("luminex_results_3.rds"))
write.csv(mean_duplicates_3, here("luminex_results_3.csv"), row.names = FALSE)